x86: Fix a few on_{selected,each}_cpus callers who should wait for completion.
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 13 May 2010 08:44:20 +0000 (09:44 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 13 May 2010 08:44:20 +0000 (09:44 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/acpi/cpufreq/cpufreq.c
xen/arch/x86/acpi/cpufreq/powernow.c
xen/arch/x86/cpu/mcheck/mce.c

index 6948d7d816a348584d3a6efc2c1ee2c84f33b9de..c0b2d94d6534decb036ad0554b8d4ee4d4da3e02 100644 (file)
@@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cmd)
         cpu_isset(smp_processor_id(), cmd->mask))
         do_drv_write((void *)cmd);
     else
-        on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0);
+        on_selected_cpus(&cmd->mask, do_drv_write, cmd, 1);
 }
 
 static u32 get_cur_val(cpumask_t mask)
index 27b53372d2e12b510eb65df5764dcf7646837ae0..18538fdc49cc7c7d82c9a1a9485bda27c172dda2 100644 (file)
@@ -137,7 +137,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
     cmd.val = next_perf_state;
     cmd.turbo = policy->turbo;
 
-    on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0);
+    on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 1);
 
     for_each_cpu_mask(j, online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
index 33e5bcb4806e3a1add4e043050de32fa6ede23bb..cf0ca5fec42069e78c4c9f142dc0ca389bb0e734 100644 (file)
@@ -1421,7 +1421,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
                add_taint(TAINT_ERROR_INJECT);
 
         if ( mce_broadcast )
-            on_each_cpu(x86_mc_mceinject, mc_mceinject, 0);
+            on_each_cpu(x86_mc_mceinject, mc_mceinject, 1);
         else
             on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
                   mc_mceinject, 1);